off-stack structure.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Xin B Li <xin.b.li@intel.com>
}
static void hvm_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
- struct mmio_op *mmio_opp)
+ struct hvm_io_op *pio_opp)
{
unsigned long old_eax;
int sign = p->df ? -1 : 1;
- if ( p->pdata_valid || (mmio_opp->flags & OVERLAP) )
+ if ( p->pdata_valid || (pio_opp->flags & OVERLAP) )
{
- if ( mmio_opp->flags & REPZ )
+ if ( pio_opp->flags & REPZ )
regs->ecx -= p->count;
if ( p->dir == IOREQ_READ )
{
regs->edi += sign * p->count * p->size;
- if ( mmio_opp->flags & OVERLAP )
+ if ( pio_opp->flags & OVERLAP )
{
unsigned long addr = regs->edi;
if (hvm_realmode(current))
}
}
-static void hvm_mmio_assist(struct vcpu *v, struct cpu_user_regs *regs,
- ioreq_t *p, struct mmio_op *mmio_opp)
+static void hvm_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
+ struct hvm_io_op *mmio_opp)
{
int sign = p->df ? -1 : 1;
int size = -1, index = -1;
break;
case INSTR_XCHG:
- if (src & REGISTER) {
- index = operand_index(src);
- set_reg_value(size, index, 0, regs, p->u.data);
- } else {
- index = operand_index(dst);
- set_reg_value(size, index, 0, regs, p->u.data);
- }
- break;
+ if (src & REGISTER) {
+ index = operand_index(src);
+ set_reg_value(size, index, 0, regs, p->u.data);
+ } else {
+ index = operand_index(dst);
+ set_reg_value(size, index, 0, regs, p->u.data);
+ }
+ break;
}
-
- hvm_load_cpu_guest_regs(v, regs);
}
void hvm_io_assist(struct vcpu *v)
{
vcpu_iodata_t *vio;
ioreq_t *p;
- struct cpu_user_regs *regs = guest_cpu_user_regs();
- struct mmio_op *mmio_opp;
- struct cpu_user_regs *inst_decoder_regs;
+ struct cpu_user_regs *regs;
+ struct hvm_io_op *io_opp;
- mmio_opp = &v->arch.hvm_vcpu.mmio_op;
- inst_decoder_regs = mmio_opp->inst_decoder_regs;
+ io_opp = &v->arch.hvm_vcpu.io_op;
+ regs = &io_opp->io_context;
vio = get_vio(v->domain, v->vcpu_id);
- if (vio == 0) {
- HVM_DBG_LOG(DBG_LEVEL_1,
- "bad shared page: %lx", (unsigned long) vio);
- printf("bad shared page: %lx\n", (unsigned long) vio);
+ if ( vio == 0 ) {
+ printf("bad shared page: %lx\n", (unsigned long)vio);
domain_crash_synchronous();
}
p = &vio->vp_ioreq;
/* clear IO wait HVM flag */
- if (test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags)) {
- if (p->state == STATE_IORESP_READY) {
+ if ( test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) ) {
+ if ( p->state == STATE_IORESP_READY ) {
p->state = STATE_INVALID;
clear_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags);
- if (p->type == IOREQ_TYPE_PIO)
- hvm_pio_assist(regs, p, mmio_opp);
- else
- hvm_mmio_assist(v, regs, p, mmio_opp);
+ if ( p->type == IOREQ_TYPE_PIO )
+ hvm_pio_assist(regs, p, io_opp);
+ else {
+ hvm_mmio_assist(regs, p, io_opp);
+ hvm_load_cpu_guest_regs(v, regs);
+ }
+
+ /* Copy register changes back into current guest state. */
+ memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
}
/* else an interrupt send event raced us */
}
ioreq_t *p;
struct cpu_user_regs *regs;
- regs = current->arch.hvm_vcpu.mmio_op.inst_decoder_regs;
+ regs = ¤t->arch.hvm_vcpu.io_op.io_context;
vio = get_vio(v->domain, v->vcpu_id);
if (vio == NULL) {
}
static void mmio_operands(int type, unsigned long gpa, struct instruction *inst,
- struct mmio_op *mmio_opp, struct cpu_user_regs *regs)
+ struct hvm_io_op *mmio_opp, struct cpu_user_regs *regs)
{
unsigned long value = 0;
int index, size_reg;
void handle_mmio(unsigned long va, unsigned long gpa)
{
unsigned long inst_addr;
- struct mmio_op *mmio_opp;
+ struct hvm_io_op *mmio_opp;
struct cpu_user_regs *regs;
struct instruction mmio_inst;
unsigned char inst[MAX_INST_LEN];
int i, realmode, ret, inst_len;
struct vcpu *v = current;
- mmio_opp = &v->arch.hvm_vcpu.mmio_op;
+ mmio_opp = &v->arch.hvm_vcpu.io_op;
+ regs = &mmio_opp->io_context;
+
+ /* Copy current guest state into io instruction state structure. */
+ memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
- regs = mmio_opp->inst_decoder_regs;
hvm_store_cpu_guest_regs(v, regs, NULL);
if ((inst_len = hvm_instruction_length(v)) <= 0) {
unsigned long operand = mmio_inst.operand[0];
value = get_reg_value(operand_size(operand),
operand_index(operand), 0,
- mmio_opp->inst_decoder_regs);
+ regs);
/* send the request and wait for the value */
send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,
mmio_inst.op_size, value, IOREQ_WRITE, 0);
unsigned long operand = mmio_inst.operand[1];
value = get_reg_value(operand_size(operand),
operand_index(operand), 0,
- mmio_opp->inst_decoder_regs);
+ regs);
/* send the request and wait for the value */
send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,
mmio_inst.op_size, value, IOREQ_WRITE, 0);
}
-static void svm_io_instruction(struct vcpu *v, struct cpu_user_regs *regs)
+static void svm_io_instruction(struct vcpu *v)
{
- struct mmio_op *mmio_opp;
+ struct cpu_user_regs *regs;
+ struct hvm_io_op *pio_opp;
unsigned int port;
unsigned int size, dir;
ioio_info_t info;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
ASSERT(vmcb);
- mmio_opp = ¤t->arch.hvm_vcpu.mmio_op;
- mmio_opp->instr = INSTR_PIO;
- mmio_opp->flags = 0;
+ pio_opp = ¤t->arch.hvm_vcpu.io_op;
+ pio_opp->instr = INSTR_PIO;
+ pio_opp->flags = 0;
+
+ regs = &pio_opp->io_context;
+
+ /* Copy current guest state into io instruction state structure. */
+ memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
info.bytes = vmcb->exitinfo1;
/* "rep" prefix */
if (info.fields.rep)
{
- mmio_opp->flags |= REPZ;
+ pio_opp->flags |= REPZ;
}
else
{
{
unsigned long value = 0;
- mmio_opp->flags |= OVERLAP;
+ pio_opp->flags |= OVERLAP;
if (dir == IOREQ_WRITE)
hvm_copy(&value, addr, size, HVM_COPY_IN);
(unsigned long)regs.ecx, (unsigned long)regs.edx,
(unsigned long)regs.esi, (unsigned long)regs.edi);
- v->arch.hvm_vcpu.mmio_op.inst_decoder_regs = ®s;
-
-//printk("PF1\n");
if (!(error = svm_do_page_fault(va, ®s)))
{
/* Inject #PG using Interruption-Information Fields */
break;
case VMEXIT_IOIO:
- svm_io_instruction(v, ®s);
+ svm_io_instruction(v);
break;
case VMEXIT_MSR:
unsigned long count, int size, long value,
int dir, int pvalid);
-static void vmx_io_instruction(struct cpu_user_regs *regs,
- unsigned long exit_qualification, unsigned long inst_len)
+static void vmx_io_instruction(unsigned long exit_qualification,
+ unsigned long inst_len)
{
- struct mmio_op *mmio_opp;
+ struct cpu_user_regs *regs;
+ struct hvm_io_op *pio_opp;
unsigned long eip, cs, eflags;
unsigned long port, size, dir;
int vm86;
- mmio_opp = ¤t->arch.hvm_vcpu.mmio_op;
- mmio_opp->instr = INSTR_PIO;
- mmio_opp->flags = 0;
+ pio_opp = ¤t->arch.hvm_vcpu.io_op;
+ pio_opp->instr = INSTR_PIO;
+ pio_opp->flags = 0;
+
+ regs = &pio_opp->io_context;
+
+ /* Copy current guest state into io instruction state structure. */
+ memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
__vmread(GUEST_RIP, &eip);
__vmread(GUEST_CS_SELECTOR, &cs);
addr = dir == IOREQ_WRITE ? regs->esi : regs->edi;
if (test_bit(5, &exit_qualification)) { /* "rep" prefix */
- mmio_opp->flags |= REPZ;
+ pio_opp->flags |= REPZ;
count = vm86 ? regs->ecx & 0xFFFF : regs->ecx;
}
if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
unsigned long value = 0;
- mmio_opp->flags |= OVERLAP;
+ pio_opp->flags |= OVERLAP;
if (dir == IOREQ_WRITE)
hvm_copy(&value, addr, size, HVM_COPY_IN);
send_pio_req(regs, port, 1, size, value, dir, 0);
(unsigned long)regs.eax, (unsigned long)regs.ebx,
(unsigned long)regs.ecx, (unsigned long)regs.edx,
(unsigned long)regs.esi, (unsigned long)regs.edi);
- v->arch.hvm_vcpu.mmio_op.inst_decoder_regs = ®s;
if (!(error = vmx_do_page_fault(va, ®s))) {
/*
case EXIT_REASON_IO_INSTRUCTION:
__vmread(EXIT_QUALIFICATION, &exit_qualification);
__get_instruction_length(inst_len);
- vmx_io_instruction(®s, exit_qualification, inst_len);
+ vmx_io_instruction(exit_qualification, inst_len);
TRACE_VMEXIT(4,exit_qualification);
break;
case EXIT_REASON_MSR_READ:
#define MAX_INST_LEN 15 /* Maximum instruction length = 15 bytes */
-struct mmio_op {
+struct hvm_io_op {
int flags;
int instr; /* instruction */
unsigned long operand[2]; /* operands */
unsigned long immediate; /* immediate portion */
- struct cpu_user_regs *inst_decoder_regs; /* current context */
+ struct cpu_user_regs io_context; /* current context */
};
#define MAX_IO_HANDLER 8
#define HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI 1
struct hvm_vcpu {
- unsigned long ioflags;
- struct mmio_op mmio_op;
- struct vlapic *vlapic;
- s64 cache_tsc_offset;
- u64 guest_time;
+ unsigned long ioflags;
+ struct hvm_io_op io_op;
+ struct vlapic *vlapic;
+ s64 cache_tsc_offset;
+ u64 guest_time;
/* For AP startup */
- unsigned long init_sipi_sipi_state;
+ unsigned long init_sipi_sipi_state;
/* Flags */
- int flag_dr_dirty;
+ int flag_dr_dirty;
union {
struct arch_vmx_struct vmx;
} u;
};
-#define ARCH_HVM_IO_WAIT 1 /* Waiting for I/O completion */
+#define ARCH_HVM_IO_WAIT 1 /* Waiting for I/O completion */
+
+#define HVM_CONTEXT_STACK_BYTES (offsetof(struct cpu_user_regs, error_code))
#endif /* __ASM_X86_HVM_VCPU_H__ */